Joelito's picture
uploaded dataset files
ee2ae9b
import pandas as pd
import os
from typing import Union
import datasets
from datasets import load_dataset
def save_and_compress(dataset: Union[datasets.Dataset, pd.DataFrame], name: str, idx=None):
if idx:
path = f"{name}_{idx}.jsonl"
else:
path = f"{name}.jsonl"
print("Saving to", path)
dataset.to_json(path, force_ascii=False, orient='records', lines=True)
print("Compressing...")
os.system(f'xz -zkf -T0 {path}') # -TO to use multithreading
def get_dataset_column_from_text_folder(folder_path):
return load_dataset("text", data_dir=folder_path, sample_by="document", split='train').to_pandas()['text']
for split in ["train", "test"]:
dfs = []
for dataset_name in ["IN-Abs", "UK-Abs", "IN-Ext"]:
if dataset_name == "IN-Ext" and split == "test":
continue
print(f"Processing {dataset_name} {split}")
path = f"original_dataset/{dataset_name}/{split}-data"
df = pd.DataFrame()
df['judgement'] = get_dataset_column_from_text_folder(f"{path}/judgement")
df['dataset_name'] = dataset_name
summary_full_path = f"{path}/summary"
if dataset_name == "UK-Abs":
if split == "test":
summary_full_path = f"{path}/summary/full"
for segment in ['background', 'judgement', 'reasons']:
df[f'summary/{segment}'] = get_dataset_column_from_text_folder(
f"{path}/summary/segment-wise/{segment}")
elif dataset_name == "IN-Ext":
summary_full_path = f"{path}/summary/full"
for annotator in ['A1', 'A2']:
for segment in ['facts', 'judgement']: # errors when reading 'analysis' / 'argument' / 'statute'
print(f"Processing {dataset_name} {split} {annotator} {segment}")
df[f'summary/{annotator}/{segment}'] = get_dataset_column_from_text_folder(
f"{path}/summary/segment-wise/{annotator}/{segment}")
df['summary/full'] = get_dataset_column_from_text_folder(summary_full_path)
dfs.append(df)
df = pd.concat(dfs)
save_and_compress(df, f"data/{split}")