Joelito commited on
Commit
ee2ae9b
1 Parent(s): 0b3b0fe

uploaded dataset files

Browse files
Files changed (5) hide show
  1. README.md +122 -0
  2. original_dataset.zip +3 -0
  3. prepare_data.py +59 -0
  4. test.jsonl.xz +3 -0
  5. train.jsonl.xz +3 -0
README.md ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Card for LegalCaseDocumentSummarization
2
+
3
+ ## Table of Contents
4
+ - [Table of Contents](#table-of-contents)
5
+ - [Dataset Description](#dataset-description)
6
+ - [Dataset Summary](#dataset-summary)
7
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
8
+ - [Languages](#languages)
9
+ - [Dataset Structure](#dataset-structure)
10
+ - [Data Instances](#data-instances)
11
+ - [Data Fields](#data-fields)
12
+ - [Data Splits](#data-splits)
13
+ - [Dataset Creation](#dataset-creation)
14
+ - [Curation Rationale](#curation-rationale)
15
+ - [Source Data](#source-data)
16
+ - [Annotations](#annotations)
17
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
18
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
19
+ - [Social Impact of Dataset](#social-impact-of-dataset)
20
+ - [Discussion of Biases](#discussion-of-biases)
21
+ - [Other Known Limitations](#other-known-limitations)
22
+ - [Additional Information](#additional-information)
23
+ - [Dataset Curators](#dataset-curators)
24
+ - [Licensing Information](#licensing-information)
25
+ - [Citation Information](#citation-information)
26
+ - [Contributions](#contributions)
27
+
28
+ ## Dataset Description
29
+
30
+ - **Homepage:** [GitHub](https://github.com/Law-AI/summarization)
31
+ - **Repository:** [Zenodo](https://zenodo.org/record/7152317#.Y69PkeKZODW)
32
+ - **Paper:**
33
+ - **Leaderboard:**
34
+ - **Point of Contact:**
35
+
36
+ ### Dataset Summary
37
+
38
+ [More Information Needed]
39
+
40
+ ### Supported Tasks and Leaderboards
41
+
42
+ [More Information Needed]
43
+
44
+ ### Languages
45
+
46
+ [More Information Needed]
47
+
48
+ ## Dataset Structure
49
+
50
+ ### Data Instances
51
+
52
+ [More Information Needed]
53
+
54
+ ### Data Fields
55
+
56
+ [More Information Needed]
57
+
58
+ ### Data Splits
59
+
60
+ [More Information Needed]
61
+
62
+ ## Dataset Creation
63
+
64
+ ### Curation Rationale
65
+
66
+ [More Information Needed]
67
+
68
+ ### Source Data
69
+
70
+ #### Initial Data Collection and Normalization
71
+
72
+ [More Information Needed]
73
+
74
+ #### Who are the source language producers?
75
+
76
+ [More Information Needed]
77
+
78
+ ### Annotations
79
+
80
+ #### Annotation process
81
+
82
+ [More Information Needed]
83
+
84
+ #### Who are the annotators?
85
+
86
+ [More Information Needed]
87
+
88
+ ### Personal and Sensitive Information
89
+
90
+ [More Information Needed]
91
+
92
+ ## Considerations for Using the Data
93
+
94
+ ### Social Impact of Dataset
95
+
96
+ [More Information Needed]
97
+
98
+ ### Discussion of Biases
99
+
100
+ [More Information Needed]
101
+
102
+ ### Other Known Limitations
103
+
104
+ [More Information Needed]
105
+
106
+ ## Additional Information
107
+
108
+ ### Dataset Curators
109
+
110
+ [More Information Needed]
111
+
112
+ ### Licensing Information
113
+
114
+ [More Information Needed]
115
+
116
+ ### Citation Information
117
+
118
+ [More Information Needed]
119
+
120
+ ### Contributions
121
+
122
+ Thanks to [@JoelNiklaus](https://github.com/JoelNiklaus) for adding this dataset.
original_dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6141613c07eb5a16f0c3f0da0aec974bd218ce12d15035b9aba37dda3e7e1b96
3
+ size 105247667
prepare_data.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ import os
4
+ from typing import Union
5
+
6
+ import datasets
7
+ from datasets import load_dataset
8
+
9
+
10
+
11
+ def save_and_compress(dataset: Union[datasets.Dataset, pd.DataFrame], name: str, idx=None):
12
+ if idx:
13
+ path = f"{name}_{idx}.jsonl"
14
+ else:
15
+ path = f"{name}.jsonl"
16
+
17
+ print("Saving to", path)
18
+ dataset.to_json(path, force_ascii=False, orient='records', lines=True)
19
+
20
+ print("Compressing...")
21
+ os.system(f'xz -zkf -T0 {path}') # -TO to use multithreading
22
+
23
+
24
+
25
+
26
+ def get_dataset_column_from_text_folder(folder_path):
27
+ return load_dataset("text", data_dir=folder_path, sample_by="document", split='train').to_pandas()['text']
28
+
29
+
30
+ for split in ["train", "test"]:
31
+ dfs = []
32
+ for dataset_name in ["IN-Abs", "UK-Abs", "IN-Ext"]:
33
+ if dataset_name == "IN-Ext" and split == "test":
34
+ continue
35
+ print(f"Processing {dataset_name} {split}")
36
+ path = f"original_dataset/{dataset_name}/{split}-data"
37
+
38
+ df = pd.DataFrame()
39
+ df['judgement'] = get_dataset_column_from_text_folder(f"{path}/judgement")
40
+ df['dataset_name'] = dataset_name
41
+
42
+ summary_full_path = f"{path}/summary"
43
+ if dataset_name == "UK-Abs":
44
+ if split == "test":
45
+ summary_full_path = f"{path}/summary/full"
46
+ for segment in ['background', 'judgement', 'reasons']:
47
+ df[f'summary/{segment}'] = get_dataset_column_from_text_folder(
48
+ f"{path}/summary/segment-wise/{segment}")
49
+ elif dataset_name == "IN-Ext":
50
+ summary_full_path = f"{path}/summary/full"
51
+ for annotator in ['A1', 'A2']:
52
+ for segment in ['facts', 'judgement']: # errors when reading 'analysis' / 'argument' / 'statute'
53
+ print(f"Processing {dataset_name} {split} {annotator} {segment}")
54
+ df[f'summary/{annotator}/{segment}'] = get_dataset_column_from_text_folder(
55
+ f"{path}/summary/segment-wise/{annotator}/{segment}")
56
+ df['summary/full'] = get_dataset_column_from_text_folder(summary_full_path)
57
+ dfs.append(df)
58
+ df = pd.concat(dfs)
59
+ save_and_compress(df, f"data/{split}")
test.jsonl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313dd03ac209312faa6ed75128d54878949bb8dad3fa8622c6936e6d5231dd41
3
+ size 2323776
train.jsonl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc4714244b4cec524bfa56ee718f855d4d33e7a0fa3df595f9ce036ac5ac1ecc
3
+ size 50589476