File size: 2,192 Bytes
ee2ae9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import pandas as pd

import os
from typing import Union

import datasets
from datasets import load_dataset



def save_and_compress(dataset: Union[datasets.Dataset, pd.DataFrame], name: str, idx=None):
    if idx:
        path = f"{name}_{idx}.jsonl"
    else:
        path = f"{name}.jsonl"

    print("Saving to", path)
    dataset.to_json(path, force_ascii=False, orient='records', lines=True)

    print("Compressing...")
    os.system(f'xz -zkf -T0 {path}')  # -TO to use multithreading




def get_dataset_column_from_text_folder(folder_path):
    return load_dataset("text", data_dir=folder_path, sample_by="document", split='train').to_pandas()['text']


for split in ["train", "test"]:
    dfs = []
    for dataset_name in ["IN-Abs", "UK-Abs", "IN-Ext"]:
        if dataset_name == "IN-Ext" and split == "test":
            continue
        print(f"Processing {dataset_name} {split}")
        path = f"original_dataset/{dataset_name}/{split}-data"

        df = pd.DataFrame()
        df['judgement'] = get_dataset_column_from_text_folder(f"{path}/judgement")
        df['dataset_name'] = dataset_name

        summary_full_path = f"{path}/summary"
        if dataset_name == "UK-Abs":
            if split == "test":
                summary_full_path = f"{path}/summary/full"
                for segment in ['background', 'judgement', 'reasons']:
                    df[f'summary/{segment}'] = get_dataset_column_from_text_folder(
                        f"{path}/summary/segment-wise/{segment}")
        elif dataset_name == "IN-Ext":
            summary_full_path = f"{path}/summary/full"
            for annotator in ['A1', 'A2']:
                for segment in ['facts', 'judgement']:  # errors when reading 'analysis' / 'argument' / 'statute'
                    print(f"Processing {dataset_name} {split} {annotator} {segment}")
                    df[f'summary/{annotator}/{segment}'] = get_dataset_column_from_text_folder(
                        f"{path}/summary/segment-wise/{annotator}/{segment}")
        df['summary/full'] = get_dataset_column_from_text_folder(summary_full_path)
        dfs.append(df)
    df = pd.concat(dfs)
    save_and_compress(df, f"data/{split}")